SVM patch to fix guest time, including 64bit msr code - allowing 64bit
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 23 Mar 2006 09:50:34 +0000 (10:50 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 23 Mar 2006 09:50:34 +0000 (10:50 +0100)
linux guests to enable APIC (ie. apic=1 now works in guest config file).

Signed-off-by: Tom Woller <thomas.woller@amd.com>
xen/arch/x86/hvm/svm/intr.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/svm/vmcb.c
xen/include/asm-x86/hvm/svm/svm.h

index 86fc6e4b9d1050b557d0b78373af680a3fd3a245..46de3cd40abf80f3d3415f0a3e33e466688505b1 100644 (file)
  */
 #define BSP_CPU(v)    (!(v->vcpu_id))
 
-static inline int svm_inject_extint(struct vcpu *v, int trap, int error_code)
+u64 svm_get_guest_time(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    vintr_t intr;
-
-    ASSERT(vmcb);
-
-    /* Save all fields */
-    intr = vmcb->vintr;
-    /* Update only relevant fields */    
-    intr.fields.irq = 1;
-    intr.fields.intr_masking = 1;
-    intr.fields.vector = trap;
-    intr.fields.prio = 0xF;
-    intr.fields.ign_tpr = 1;
-    vmcb->vintr = intr;
-//  printf( "IRQ = %d\n", trap );
-    return 0;
+    struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
+    u64    host_tsc;
+    
+    rdtscll(host_tsc);
+    return host_tsc + vpit->cache_tsc_offset;
 }
 
-void svm_set_tsc_shift(struct vcpu *v, struct hvm_virpit *vpit)
+void svm_set_guest_time(struct vcpu *v, u64 gtime)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    u64    drift;
-
-    if ( vpit->first_injected )
-        drift = vpit->period_cycles * vpit->pending_intr_nr;
-    else
-        drift = 0;
-    vmcb->tsc_offset = ( 0 - drift );
+    struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
+    u64    host_tsc;
+   
+    rdtscll(host_tsc);
+    
+    vpit->cache_tsc_offset = gtime - host_tsc;
+    v->arch.hvm_svm.vmcb->tsc_offset = vpit->cache_tsc_offset;
 }
 
 static inline void
@@ -82,15 +70,19 @@ interrupt_post_injection(struct vcpu * v, int vector, int type)
     struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
 
     if ( is_pit_irq(v, vector, type) ) {
-            if ( !vpit->first_injected ) {
-                vpit->first_injected = 1;
-                vpit->pending_intr_nr = 0;
-            }
-            else if (vpit->pending_intr_nr) {
-                --vpit->pending_intr_nr;
-            }
-            vpit->inject_point = NOW();
-            svm_set_tsc_shift (v, vpit);
+        if ( !vpit->first_injected ) {
+            vpit->pending_intr_nr = 0;
+            vpit->last_pit_gtime = svm_get_guest_time(v);
+            vpit->scheduled = NOW() + vpit->period;
+            set_timer(&vpit->pit_timer, vpit->scheduled);
+            vpit->first_injected = 1;
+        } else {
+            vpit->pending_intr_nr--;
+        }
+        vpit->inject_point = NOW();
+
+        vpit->last_pit_gtime += vpit->period;
+        svm_set_guest_time(v, vpit->last_pit_gtime);
     }
 
     switch(type)
@@ -100,9 +92,30 @@ interrupt_post_injection(struct vcpu * v, int vector, int type)
 
     default:
         vlapic_post_injection(v, vector, type);
+        break;
     }
 }
 
+static inline int svm_inject_extint(struct vcpu *v, int trap, int error_code)
+{
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    vintr_t intr;
+
+    ASSERT(vmcb);
+
+    /* Save all fields */
+    intr = vmcb->vintr;
+    /* Update only relevant fields */    
+    intr.fields.irq = 1;
+    intr.fields.intr_masking = 1;
+    intr.fields.vector = trap;
+    intr.fields.prio = 0xF;
+    intr.fields.ign_tpr = 1;
+    vmcb->vintr = intr;
+//  printf( "IRQ = %d\n", trap );
+    return 0;
+}
+
 asmlinkage void svm_intr_assist(void) 
 {
     struct vcpu *v = current;
index 4e6965feac4106418c850a5f9ad5700d438344a5..42bda15b38e761c5b5fc65a32194ff34ea8aa2c1 100644 (file)
@@ -670,8 +670,18 @@ static void arch_svm_do_launch(struct vcpu *v)
     reset_stack_and_jump(svm_asm_do_launch);
 }
 
+static void svm_freeze_time(struct vcpu *v)
+{
+    struct hvm_virpit *vpit = &v->domain->arch.hvm_domain.vpit;
+    
+    v->domain->arch.hvm_domain.guest_time = svm_get_guest_time(v);
+    if ( vpit->first_injected )
+        stop_timer(&(vpit->pit_timer));
+}
+
 static void svm_ctxt_switch_from(struct vcpu *v)
 {
+    svm_freeze_time(v);
 }
 
 static void svm_ctxt_switch_to(struct vcpu *v)
@@ -911,7 +921,7 @@ static void svm_vmexit_do_cpuid(struct vmcb_struct *vmcb, unsigned long input,
 
     if (input == 1)
     {
-        if ( hvm_apic_support(v->domain) &&
+        if ( !hvm_apic_support(v->domain) ||
                 !vlapic_global_enabled((VLAPIC(v))) )
             clear_bit(X86_FEATURE_APIC, &edx);
            
@@ -1693,7 +1703,7 @@ static inline void svm_do_msr_access(struct vcpu *v, struct cpu_user_regs *regs)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     int  inst_len;
-    int64_t tsc_sum;
+    u64 msr_content=0;
 
     ASSERT(vmcb);
 
@@ -1708,24 +1718,27 @@ static inline void svm_do_msr_access(struct vcpu *v, struct cpu_user_regs *regs)
         inst_len = __get_instruction_length(vmcb, INSTR_RDMSR, NULL);
 
         regs->edx = 0;
-        switch (regs->ecx)
+        switch (regs->ecx) {
+        case MSR_IA32_TIME_STAMP_COUNTER:
         {
+            struct hvm_virpit *vpit;
+
+            rdtscll(msr_content);
+            vpit = &(v->domain->arch.hvm_domain.vpit);
+            msr_content += vpit->cache_tsc_offset;
+            break;
+        }
         case MSR_IA32_SYSENTER_CS:
-            regs->eax = vmcb->sysenter_cs;
+            msr_content = vmcb->sysenter_cs;
             break;
         case MSR_IA32_SYSENTER_ESP: 
-            regs->eax = vmcb->sysenter_esp;
+            msr_content = vmcb->sysenter_esp;
             break;
         case MSR_IA32_SYSENTER_EIP:     
-            regs->eax = vmcb->sysenter_eip;
+            msr_content = vmcb->sysenter_eip;
             break;
-        case MSR_IA32_TIME_STAMP_COUNTER:
-            __asm__ __volatile__("rdtsc" : "=a" (regs->eax), "=d" (regs->edx));
-            tsc_sum = regs->edx;
-            tsc_sum = (tsc_sum << 32) + regs->eax;
-            tsc_sum += (int64_t) vmcb->tsc_offset;
-            regs->eax = tsc_sum & 0xFFFFFFFF;
-            regs->edx = (tsc_sum >> 32) & 0xFFFFFFFF;
+        case MSR_IA32_APICBASE:
+            msr_content = VLAPIC(v) ? VLAPIC(v)->apic_base_msr : 0;
             break;
         default:
             if (long_mode_do_msr_read(regs))
@@ -1733,21 +1746,30 @@ static inline void svm_do_msr_access(struct vcpu *v, struct cpu_user_regs *regs)
             rdmsr_safe(regs->ecx, regs->eax, regs->edx);
             break;
         }
+        regs->eax = msr_content & 0xFFFFFFFF;
+        regs->edx = msr_content >> 32;
     }
     else
     {
         inst_len = __get_instruction_length(vmcb, INSTR_WRMSR, NULL);
+        msr_content = (regs->eax & 0xFFFFFFFF) | ((u64)regs->edx << 32);
 
         switch (regs->ecx)
         {
+        case MSR_IA32_TIME_STAMP_COUNTER:
+            svm_set_guest_time(v, msr_content);
+            break;
         case MSR_IA32_SYSENTER_CS:
-            vmcb->sysenter_cs = regs->eax;
+            vmcb->sysenter_cs = msr_content;
             break;
         case MSR_IA32_SYSENTER_ESP: 
-            vmcb->sysenter_esp = regs->eax;
+            vmcb->sysenter_esp = msr_content;
             break;
         case MSR_IA32_SYSENTER_EIP:     
-            vmcb->sysenter_eip = regs->eax;
+            vmcb->sysenter_eip = msr_content;
+            break;
+        case MSR_IA32_APICBASE:
+            vlapic_msr_set(VLAPIC(v), msr_content);
             break;
         default:
             long_mode_do_msr_write(regs);
index eeca39ee68bd4e203de72b030cd2c011bdc12c23..2fb45896f5f31a63ca3fc26b430944a09c016b2b 100644 (file)
@@ -467,6 +467,8 @@ void svm_do_launch(struct vcpu *v)
     v->arch.hvm_svm.injecting_event  = 0;
     v->arch.hvm_svm.saved_irq_vector = -1;
 
+    svm_set_guest_time(v, 0);
+       
     if (svm_dbg_on)
         svm_dump_vmcb(__func__, vmcb);
 }
@@ -494,16 +496,17 @@ void svm_do_resume(struct vcpu *v)
     struct hvm_virpit *vpit = &d->arch.hvm_domain.vpit;
 
     svm_stts(v);
+    
+    /* pick up the elapsed PIT ticks and re-enable pit_timer */
+    if ( vpit->first_injected) {
+        svm_set_guest_time(v, v->domain->arch.hvm_domain.guest_time);
+        pickup_deactive_ticks(vpit);
+    }
 
     if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
          test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
         hvm_wait_io();
 
-    /* pick up the elapsed PIT ticks and re-enable pit_timer */
-    if ( vpit->first_injected )
-        pickup_deactive_ticks(vpit);
-    svm_set_tsc_shift(v, vpit);
-
     /* We can't resume the guest if we're waiting on I/O */
     ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));
 }
index 16c7decbb2fab2faf3cfab2e24d32c860a648b1b..b74ed8c759e4ecf11c0ab608c71c88512562d0e2 100644 (file)
@@ -48,6 +48,8 @@ extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
 extern void svm_stts(struct vcpu *v); 
 extern void svm_do_launch(struct vcpu *v);
 extern void svm_do_resume(struct vcpu *v);
+extern void svm_set_guest_time(struct vcpu *v, u64 gtime);
+extern u64 svm_get_guest_time(struct vcpu *v);
 extern void arch_svm_do_resume(struct vcpu *v);
 extern int load_vmcb(struct arch_svm_struct *arch_svm, u64 phys_hsa);
 /* For debugging. Remove when no longer needed. */